From 2e4b001df0b4e2ef1bf611c330aeef2964436a07 Mon Sep 17 00:00:00 2001 From: "kaf24@scramble.cl.cam.ac.uk" Date: Tue, 27 Jul 2004 19:37:36 +0000 Subject: [PATCH] bitkeeper revision 1.1108.26.1 (4106af006Bpl_nN84MShtvHA51xG0Q) Domain allocations use the buddy allocator. --- xen/arch/x86/domain.c | 5 + xen/arch/x86/shadow.c | 18 +-- xen/common/dom0_ops.c | 113 +++++++------------ xen/common/dom_mem_ops.c | 11 -- xen/common/domain.c | 28 +---- xen/common/event_channel.c | 1 - xen/common/kernel.c | 16 +-- xen/common/memory.c | 25 +---- xen/common/page_alloc.c | 207 ++++++++++++++++++++++++----------- xen/include/asm-x86/config.h | 6 - xen/include/asm-x86/domain.h | 5 +- xen/include/asm-x86/mm.h | 18 ++- xen/include/asm-x86/page.h | 1 + xen/include/xen/mm.h | 21 +++- 14 files changed, 232 insertions(+), 243 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index b189cc7732..73a838d6fb 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -210,6 +210,11 @@ void machine_halt(void) __machine_halt(NULL); } +void free_perdomain_pt(struct domain *d) +{ + free_xenheap_page((unsigned long)d->mm.perdomain_pt); +} + void arch_do_createdomain(struct domain *d) { d->shared_info = (void *)alloc_xenheap_page(); diff --git a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c index 57cd421daa..a98cf3076c 100644 --- a/xen/arch/x86/shadow.c +++ b/xen/arch/x86/shadow.c @@ -64,11 +64,10 @@ interrupts enabled. Nothing can go wrong ;-) **/ -static inline void free_shadow_page( struct mm_struct *m, - struct pfn_info *pfn_info ) +static inline void free_shadow_page(struct mm_struct *m, + struct pfn_info *page) { - unsigned long flags; - unsigned long type = pfn_info->u.inuse.type_info & PGT_type_mask; + unsigned long type = page->u.inuse.type_info & PGT_type_mask; m->shadow_page_count--; @@ -77,14 +76,9 @@ static inline void free_shadow_page( struct mm_struct *m, else if (type == PGT_l2_page_table) perfc_decr(shadow_l2_pages); else printk("Free shadow weird page type pfn=%08x type=%08x\n", - frame_table-pfn_info, pfn_info->u.inuse.type_info); + frame_table-page, page->u.inuse.type_info); - pfn_info->u.inuse.type_info = 0; - - spin_lock_irqsave(&free_list_lock, flags); - list_add(&pfn_info->list, &free_list); - free_pfns++; - spin_unlock_irqrestore(&free_list_lock, flags); + free_domheap_page(page); } static void __free_shadow_table( struct mm_struct *m ) @@ -518,7 +512,7 @@ int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc) static inline struct pfn_info *alloc_shadow_page(struct mm_struct *m) { m->shadow_page_count++; - return alloc_domain_page(NULL); + return alloc_domheap_page(); } void unshadow_table( unsigned long gpfn, unsigned int type ) diff --git a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c index 389cfee61a..8886878b07 100644 --- a/xen/common/dom0_ops.c +++ b/xen/common/dom0_ops.c @@ -29,16 +29,16 @@ extern void arch_getdomaininfo_ctxt(struct domain *, full_execution_context_t *) static inline int is_free_domid(domid_t dom) { - struct domain *d; + struct domain *d; - if (dom >= DOMID_SELF) return 0; - d = find_domain_by_id(dom); - if (d == NULL) { - return 1; - } else { - put_domain(d); + if ( dom >= DOMID_SELF ) return 0; - } + + if ( (d = find_domain_by_id(dom)) == NULL ) + return 1; + + put_domain(d); + return 0; } /** Allocate a free domain id. We try to reuse domain ids in a fairly low range, @@ -52,73 +52,43 @@ static int allocate_domid(domid_t *pdom) static domid_t curdom = 0; static domid_t topdom = 101; int err = 0; - domid_t cur, dom, top; + domid_t dom; - /* Try to use a domain id in the range 0..topdom, starting at curdom. */ spin_lock(&domid_lock); - cur = curdom; - dom = curdom; - top = topdom; - spin_unlock(&domid_lock); - do { - ++dom; - if (dom == top) { + + /* Try to use a domain id in the range 0..topdom, starting at curdom. */ + for ( dom = curdom + 1; dom != curdom; dom++ ) + { + if ( dom == topdom ) dom = 1; - } - if (is_free_domid(dom)) goto exit; - } while (dom != cur); + if ( is_free_domid(dom) ) + goto exit; + } + /* Couldn't find a free domain id in 0..topdom, try higher. */ - for (dom = top; dom < DOMID_SELF; dom++) { - if(is_free_domid(dom)) goto exit; + for ( dom = topdom; dom < DOMID_SELF; dom++ ) + { + if ( is_free_domid(dom) ) + { + topdom = dom + 1; + goto exit; + } } + /* No free domain ids. */ err = -ENOMEM; + exit: - if (err == 0) { - spin_lock(&domid_lock); + if ( err == 0 ) + { curdom = dom; - if (dom >= topdom) { - topdom = dom + 1; - } - spin_unlock(&domid_lock); *pdom = dom; } + + spin_unlock(&domid_lock); return err; } -#if 0 - struct domain *d; - static domid_t domnr = 0; - static spinlock_t domnr_lock = SPIN_LOCK_UNLOCKED; - unsigned int pro; - domid_t dom; - - ret = -ENOMEM; - - if(op->u.createdomain.domain > 0){ - d = find_domain_by_id(dom); - if(d){ - put_domain(d); - ret = -EINVAL; - break; - } - } else { - /* Search for an unused domain identifier. */ - for ( ; ; ) - { - spin_lock(&domnr_lock); - /* Wrap the roving counter when we reach first special value. */ - if ( (dom = ++domnr) == DOMID_SELF ) - dom = domnr = 1; - spin_unlock(&domnr_lock); - - if ( (d = find_domain_by_id(dom)) == NULL ) - break; - put_domain(d); - } - } -#endif - long do_dom0_op(dom0_op_t *u_dom0_op) { long ret = 0; @@ -196,30 +166,23 @@ long do_dom0_op(dom0_op_t *u_dom0_op) unsigned int pro; domid_t dom; - ret = -ENOMEM; - dom = op->u.createdomain.domain; - if ( 0 < dom && dom < DOMID_SELF ) + if ( (dom > 0) && (dom < DOMID_SELF) ) { + ret = -EINVAL; if ( !is_free_domid(dom) ) - { - ret = -EINVAL; break; - } - } - else - { - ret = allocate_domid(&dom); - if ( ret ) break; } + else if ( (ret = allocate_domid(&dom)) != 0 ) + break; if ( op->u.createdomain.cpu == -1 ) pro = (unsigned int)dom % smp_num_cpus; else pro = op->u.createdomain.cpu % smp_num_cpus; - d = do_createdomain(dom, pro); - if ( d == NULL ) + ret = -ENOMEM; + if ( (d = do_createdomain(dom, pro)) == NULL ) break; if ( op->u.createdomain.name[0] ) @@ -527,7 +490,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op) pi->ht_per_core = ht; pi->cores = smp_num_cpus / pi->ht_per_core; pi->total_pages = max_page; - pi->free_pages = free_pfns; + pi->free_pages = avail_domheap_pages(); pi->cpu_khz = cpu_khz; copy_to_user(u_dom0_op, op, sizeof(*op)); diff --git a/xen/common/dom_mem_ops.c b/xen/common/dom_mem_ops.c index 8f8980599b..864ea3a4c1 100644 --- a/xen/common/dom_mem_ops.c +++ b/xen/common/dom_mem_ops.c @@ -22,19 +22,8 @@ static long alloc_dom_mem(struct domain *d, struct pfn_info *page; unsigned long i; - /* Leave some slack pages; e.g., for the network. */ - if ( unlikely(free_pfns < (nr_pages + (SLACK_DOMAIN_MEM_KILOBYTES >> - (PAGE_SHIFT-10)))) ) - { - DPRINTK("Not enough slack: %u %u\n", - free_pfns, - SLACK_DOMAIN_MEM_KILOBYTES >> (PAGE_SHIFT-10)); - return 0; - } - for ( i = 0; i < nr_pages; i++ ) { - /* NB. 'alloc_domain_page' does limit-checking on pages per domain. */ if ( unlikely((page = alloc_domain_page(d)) == NULL) ) { DPRINTK("Could not allocate a frame\n"); diff --git a/xen/common/domain.c b/xen/common/domain.c index 3df5013879..888c88f983 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -197,20 +197,12 @@ void domain_shutdown(u8 reason) struct pfn_info *alloc_domain_page(struct domain *d) { struct pfn_info *page = NULL; - unsigned long flags, mask, pfn_stamp, cpu_stamp; + unsigned long mask, pfn_stamp, cpu_stamp; int i; ASSERT(!in_irq()); - spin_lock_irqsave(&free_list_lock, flags); - if ( likely(!list_empty(&free_list)) ) - { - page = list_entry(free_list.next, struct pfn_info, list); - list_del(&page->list); - free_pfns--; - } - spin_unlock_irqrestore(&free_list_lock, flags); - + page = alloc_domheap_page(); if ( unlikely(page == NULL) ) return NULL; @@ -245,6 +237,7 @@ struct pfn_info *alloc_domain_page(struct domain *d) DPRINTK("Over-allocation for domain %u: %u >= %u\n", d->domain, d->tot_pages, d->max_pages); spin_unlock(&d->page_alloc_lock); + page->u.inuse.domain = NULL; goto free_and_exit; } list_add_tail(&page->list, &d->page_list); @@ -257,16 +250,12 @@ struct pfn_info *alloc_domain_page(struct domain *d) return page; free_and_exit: - spin_lock_irqsave(&free_list_lock, flags); - list_add(&page->list, &free_list); - free_pfns++; - spin_unlock_irqrestore(&free_list_lock, flags); + free_domheap_page(page); return NULL; } void free_domain_page(struct pfn_info *page) { - unsigned long flags; int drop_dom_ref; struct domain *d = page->u.inuse.domain; @@ -289,10 +278,7 @@ void free_domain_page(struct pfn_info *page) page->u.inuse.count_info = 0; - spin_lock_irqsave(&free_list_lock, flags); - list_add(&page->list, &free_list); - free_pfns++; - spin_unlock_irqrestore(&free_list_lock, flags); + free_domheap_page(page); } if ( drop_dom_ref ) @@ -310,9 +296,7 @@ unsigned int alloc_new_dom_mem(struct domain *d, unsigned int kbytes) /* Grow the allocation if necessary. */ for ( alloc_pfns = d->tot_pages; alloc_pfns < nr_pages; alloc_pfns++ ) { - if ( unlikely((page=alloc_domain_page(d)) == NULL) || - unlikely(free_pfns < (SLACK_DOMAIN_MEM_KILOBYTES >> - (PAGE_SHIFT-10))) ) + if ( unlikely((page=alloc_domain_page(d)) == NULL) ) { domain_relinquish_memory(d); return -ENOMEM; diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index c8a2560201..4df98a07d7 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -197,7 +197,6 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) if ( rc != 0 ) { d->pirq_to_evtchn[pirq] = 0; - DPRINTK("Couldn't bind to PIRQ %d (error=%d)\n", pirq, rc); goto out; } diff --git a/xen/common/kernel.c b/xen/common/kernel.c index 489269f862..dd71dc092b 100644 --- a/xen/common/kernel.c +++ b/xen/common/kernel.c @@ -220,7 +220,7 @@ void cmain(multiboot_info_t *mbi) for ( ; ; ) ; } - ASSERT((sizeof(struct pfn_info) << 20) > + ASSERT((sizeof(struct pfn_info) << 20) <= (FRAMETABLE_VIRT_END - FRAMETABLE_VIRT_START)); init_frametable((void *)FRAMETABLE_VIRT_START, max_page); @@ -258,15 +258,15 @@ void cmain(multiboot_info_t *mbi) max_page >> (20-PAGE_SHIFT), max_page, max_mem >> (20-PAGE_SHIFT)); - add_to_domain_alloc_list(dom0_memory_end, max_page << PAGE_SHIFT); - heap_start = memguard_init(&_end); - + heap_start = __va(init_heap_allocator(__pa(heap_start), max_page)); + + init_xenheap_pages(__pa(heap_start), xenheap_phys_end); printk("Xen heap size is %luKB\n", (xenheap_phys_end-__pa(heap_start))/1024 ); - init_page_allocator(__pa(heap_start), xenheap_phys_end); - + init_domheap_pages(dom0_memory_end, max_page << PAGE_SHIFT); + /* Initialise the slab allocator. */ xmem_cache_init(); xmem_cache_sizes_init(max_page); @@ -307,8 +307,8 @@ void cmain(multiboot_info_t *mbi) panic("Could not set up DOM0 guest OS\n"); /* The stash space for the initial kernel image can now be freed up. */ - add_to_domain_alloc_list(__pa(frame_table) + frame_table_size, - dom0_memory_start); + init_domheap_pages(__pa(frame_table) + frame_table_size, + dom0_memory_start); init_trace_bufs(); diff --git a/xen/common/memory.c b/xen/common/memory.c index 46b886e5ab..ec27c31327 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -37,10 +37,6 @@ struct pfn_info *frame_table; unsigned long frame_table_size; unsigned long max_page; -struct list_head free_list; -spinlock_t free_list_lock; -unsigned int free_pfns; - extern void init_percpu_info(void); void __init init_frametable(void *frametable_vstart, unsigned long nr_pages) @@ -59,10 +55,6 @@ void __init init_frametable(void *frametable_vstart, unsigned long nr_pages) memset(frame_table, 0, frame_table_size); - spin_lock_init(&free_list_lock); - INIT_LIST_HEAD(&free_list); - free_pfns = 0; - /* Initialise to a magic of 0x55555555 so easier to spot bugs later. */ memset(machine_to_phys_mapping, 0x55, 4<<20); @@ -72,22 +64,7 @@ void __init init_frametable(void *frametable_vstart, unsigned long nr_pages) mfn++ ) { frame_table[mfn].u.inuse.count_info = 1 | PGC_allocated; - frame_table[mfn].u.inuse.type_info = 1 | PGT_gdt_page; /* non-RW type */ + frame_table[mfn].u.inuse.type_info = 1 | PGT_gdt_page; /* non-RW */ frame_table[mfn].u.inuse.domain = &idle0_task; } } - - -void add_to_domain_alloc_list(unsigned long ps, unsigned long pe) -{ - unsigned long i; - unsigned long flags; - - spin_lock_irqsave(&free_list_lock, flags); - for ( i = ps >> PAGE_SHIFT; i < (pe >> PAGE_SHIFT); i++ ) - { - list_add_tail(&frame_table[i].list, &free_list); - free_pfns++; - } - spin_unlock_irqrestore(&free_list_lock, flags); -} diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index b4c87223d7..065f85088d 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -28,8 +28,6 @@ #include #include -static spinlock_t alloc_lock = SPIN_LOCK_UNLOCKED; - /********************* * ALLOCATION BITMAP @@ -63,9 +61,6 @@ static void map_alloc(unsigned long first_page, unsigned long nr_pages) ASSERT(!allocated_in_map(first_page + i)); #endif - memguard_unguard_range(phys_to_virt(first_page << PAGE_SHIFT), - nr_pages << PAGE_SHIFT); - curr_idx = first_page / PAGES_PER_MAPWORD; start_off = first_page & (PAGES_PER_MAPWORD-1); end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD; @@ -95,9 +90,6 @@ static void map_free(unsigned long first_page, unsigned long nr_pages) ASSERT(allocated_in_map(first_page + i)); #endif - memguard_guard_range(phys_to_virt(first_page << PAGE_SHIFT), - nr_pages << PAGE_SHIFT); - curr_idx = first_page / PAGES_PER_MAPWORD; start_off = first_page & (PAGES_PER_MAPWORD-1); end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD; @@ -121,127 +113,138 @@ static void map_free(unsigned long first_page, unsigned long nr_pages) * BINARY BUDDY ALLOCATOR */ -/* Linked lists of free chunks of different powers-of-two in size. */ -#define NR_ORDERS 11 /* Up to 2^10 pages can be allocated at once. */ -static struct list_head free_head[NR_ORDERS]; +#define MEMZONE_XEN 0 +#define MEMZONE_DOM 1 +#define NR_ZONES 2 + +/* Up to 2^10 pages can be allocated at once. */ +#define MIN_ORDER 0 +#define MAX_ORDER 10 +#define NR_ORDERS (MAX_ORDER - MIN_ORDER + 1) +static struct list_head heap[NR_ZONES][NR_ORDERS]; + +static unsigned long avail[NR_ZONES]; #define round_pgdown(_p) ((_p)&PAGE_MASK) #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK) +static spinlock_t heap_lock = SPIN_LOCK_UNLOCKED; -/* - * Initialise allocator, placing addresses [@min,@max] in free pool. - * @min and @max are PHYSICAL addresses. - */ -void __init init_page_allocator(unsigned long min, unsigned long max) + +/* Initialise allocator to handle up to @max_pages. */ +unsigned long init_heap_allocator( + unsigned long bitmap_start, unsigned long max_pages) { - int i; - unsigned long range, bitmap_size; - struct pfn_info *pg; + int i, j; + unsigned long bitmap_size; + + memset(avail, 0, sizeof(avail)); - for ( i = 0; i < NR_ORDERS; i++ ) - INIT_LIST_HEAD(&free_head[i]); + for ( i = 0; i < NR_ZONES; i++ ) + for ( j = 0; j < NR_ORDERS; j++ ) + INIT_LIST_HEAD(&heap[i][j]); - min = round_pgup (min); - max = round_pgdown(max); + bitmap_start = round_pgup(bitmap_start); /* Allocate space for the allocation bitmap. */ - bitmap_size = (max+1) >> (PAGE_SHIFT+3); + bitmap_size = max_pages / 8; bitmap_size = round_pgup(bitmap_size); - alloc_bitmap = (unsigned long *)phys_to_virt(min); - min += bitmap_size; - range = max - min; + alloc_bitmap = (unsigned long *)phys_to_virt(bitmap_start); /* All allocated by default. */ memset(alloc_bitmap, ~0, bitmap_size); + + return bitmap_start + bitmap_size; +} + +/* Hand the specified arbitrary page range to the specified heap zone. */ +void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&heap_lock, flags); + /* Free up the memory we've been given to play with. */ - map_free(min>>PAGE_SHIFT, range>>PAGE_SHIFT); + map_free(page_to_pfn(pg), nr_pages); + avail[zone] += nr_pages; - pg = &frame_table[min >> PAGE_SHIFT]; - while ( range != 0 ) + while ( nr_pages != 0 ) { /* * Next chunk is limited by alignment of pg, but also must not be * bigger than remaining bytes. */ - for ( i = 0; i < NR_ORDERS; i++ ) + for ( i = 0; i < MAX_ORDER; i++ ) if ( ((page_to_pfn(pg) & (1 << i)) != 0) || - ((1 << (i + PAGE_SHIFT + 1)) > range) ) + ((1 << (i + 1)) > nr_pages) ) break; PFN_ORDER(pg) = i; - list_add_tail(&pg->list, &free_head[i]); + list_add_tail(&pg->list, &heap[zone][i]); - pg += 1 << i; - range -= 1 << (i + PAGE_SHIFT); + pg += 1 << i; + nr_pages -= 1 << i; } + + spin_unlock_irqrestore(&heap_lock, flags); } -/* Allocate 2^@order contiguous pages. Returns a VIRTUAL address. */ -unsigned long alloc_xenheap_pages(int order) +/* Allocate 2^@order contiguous pages. */ +struct pfn_info *alloc_heap_pages(int zone, int order) { - int i, attempts = 0; + int i; struct pfn_info *pg; unsigned long flags; -retry: - spin_lock_irqsave(&alloc_lock, flags); + spin_lock_irqsave(&heap_lock, flags); /* Find smallest order which can satisfy the request. */ for ( i = order; i < NR_ORDERS; i++ ) - if ( !list_empty(&free_head[i]) ) + if ( !list_empty(&heap[zone][i]) ) break; if ( i == NR_ORDERS ) goto no_memory; - pg = list_entry(free_head[i].next, struct pfn_info, list); + pg = list_entry(heap[zone][i].next, struct pfn_info, list); list_del(&pg->list); /* We may have to halve the chunk a number of times. */ while ( i != order ) { PFN_ORDER(pg) = --i; - list_add_tail(&pg->list, &free_head[i]); + list_add_tail(&pg->list, &heap[zone][i]); pg += 1 << i; } - map_alloc(page_to_pfn(pg), 1<list, &free_head[order]); + list_add_tail(&pg->list, &heap[zone][order]); + + spin_unlock_irqrestore(&heap_lock, flags); +} + + + +/************************* + * XEN-HEAP SUB-ALLOCATOR + */ - spin_unlock_irqrestore(&alloc_lock, flags); +void init_xenheap_pages(unsigned long ps, unsigned long pe) +{ + ps = round_pgup(ps); + pe = round_pgdown(pe); + memguard_guard_range(__va(ps), pe - ps); + init_heap_pages(MEMZONE_XEN, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT); +} + +unsigned long alloc_xenheap_pages(int order) +{ + struct pfn_info *pg; + int attempts = 0; + + retry: + if ( unlikely((pg = alloc_heap_pages(MEMZONE_XEN, order)) == NULL) ) + goto no_memory; + memguard_unguard_range(page_to_virt(pg), 1 << (order + PAGE_SHIFT)); + return (unsigned long)page_to_virt(pg); + + no_memory: + if ( attempts++ < 8 ) + { + xmem_cache_reap(); + goto retry; + } + + printk("Cannot handle page request order %d!\n", order); + dump_slabinfo(); + return 0; +} + +void free_xenheap_pages(unsigned long p, int order) +{ + memguard_guard_range((void *)p, 1 << (order + PAGE_SHIFT)); + free_heap_pages(MEMZONE_XEN, virt_to_page(p), order); +} + + + +/************************* + * DOMAIN-HEAP SUB-ALLOCATOR + */ + +void init_domheap_pages(unsigned long ps, unsigned long pe) +{ + ps = round_pgup(ps); + pe = round_pgdown(pe); + init_heap_pages(MEMZONE_DOM, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT); +} + +struct pfn_info *alloc_domheap_pages(int order) +{ + struct pfn_info *pg = alloc_heap_pages(MEMZONE_DOM, order); + return pg; +} + +void free_domheap_pages(struct pfn_info *pg, int order) +{ + free_heap_pages(MEMZONE_DOM, pg, order); +} + +unsigned long avail_domheap_pages(void) +{ + return avail[MEMZONE_DOM]; } diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h index 496ebad1c2..a43afad709 100644 --- a/xen/include/asm-x86/config.h +++ b/xen/include/asm-x86/config.h @@ -58,12 +58,6 @@ #define __cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) #define ____cacheline_aligned __cacheline_aligned -/* - * Amount of slack domain memory to leave in system, in megabytes. - * Prevents a hard out-of-memory crunch for things like network receive. - */ -#define SLACK_DOMAIN_MEM_KILOBYTES 2048 - /* Linkage for x86 */ #define asmlinkage __attribute__((regparm(0))) #define __ALIGN .align 16,0x90 diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 061c205438..8d5942cdcb 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -7,10 +7,7 @@ extern void arch_do_createdomain(struct domain *d); extern void arch_final_setup_guestos( struct domain *d, full_execution_context_t *c); -static inline void free_perdomain_pt(struct domain *d) -{ - free_xenheap_page((unsigned long)d->mm.perdomain_pt); -} +extern void free_perdomain_pt(struct domain *d); extern void domain_relinquish_memory(struct domain *d); diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index e68ff41d3d..7f9c57cf62 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -97,13 +97,13 @@ struct pfn_info #define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) \ do { \ - (_pfn)->u.inuse.domain = (_dom); \ + (_pfn)->u.inuse.domain = (_dom); \ /* The incremented type count is intended to pin to 'writeable'. */ \ - (_pfn)->u.inuse.type_info = PGT_writeable_page | PGT_validated | 1; \ + (_pfn)->u.inuse.type_info = PGT_writeable_page | PGT_validated | 1;\ wmb(); /* install valid domain ptr before updating refcnt. */ \ spin_lock(&(_dom)->page_alloc_lock); \ /* _dom holds an allocation reference */ \ - (_pfn)->u.inuse.count_info = PGC_allocated | 1; \ + (_pfn)->u.inuse.count_info = PGC_allocated | 1; \ if ( unlikely((_dom)->xenheap_pages++ == 0) ) \ get_domain(_dom); \ spin_unlock(&(_dom)->page_alloc_lock); \ @@ -111,14 +111,10 @@ struct pfn_info extern struct pfn_info *frame_table; extern unsigned long frame_table_size; -extern struct list_head free_list; -extern spinlock_t free_list_lock; -extern unsigned int free_pfns; extern unsigned long max_page; void init_frametable(void *frametable_vstart, unsigned long nr_pages); -void add_to_domain_alloc_list(unsigned long ps, unsigned long pe); -struct pfn_info *alloc_domain_page(struct domain *p); +struct pfn_info *alloc_domain_page(struct domain *d); void free_domain_page(struct pfn_info *page); int alloc_page_type(struct pfn_info *page, unsigned int type); @@ -287,10 +283,10 @@ static inline int get_page_and_type(struct pfn_info *page, return rc; } -#define ASSERT_PAGE_IS_TYPE(_p, _t) \ - ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \ +#define ASSERT_PAGE_IS_TYPE(_p, _t) \ + ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \ ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0) -#define ASSERT_PAGE_IS_DOMAIN(_p, _d) \ +#define ASSERT_PAGE_IS_DOMAIN(_p, _d) \ ASSERT(((_p)->u.inuse.count_info & PGC_count_mask) != 0); \ ASSERT((_p)->u.inuse.domain == (_d)) diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h index c7a8905064..d65295c802 100644 --- a/xen/include/asm-x86/page.h +++ b/xen/include/asm-x86/page.h @@ -102,6 +102,7 @@ typedef struct { unsigned long pt_lo; } pagetable_t; #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define page_address(_p) (__va(((_p) - frame_table) << PAGE_SHIFT)) +#define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT)) #define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT)) #define VALID_PAGE(page) ((page - frame_table) < max_mapnr) diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index 409e5c4bd1..7133af933d 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -2,13 +2,28 @@ #ifndef __XEN_MM_H__ #define __XEN_MM_H__ -/* page_alloc.c */ -void init_page_allocator(unsigned long min, unsigned long max); +#include + +/* Generic allocator */ +unsigned long init_heap_allocator( + unsigned long bitmap_start, unsigned long max_pages); +void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages); +struct pfn_info *alloc_heap_pages(int zone, int order); +void free_heap_pages(int zone, struct pfn_info *pg, int order); + +/* Xen suballocator */ +void init_xenheap_pages(unsigned long ps, unsigned long pe); unsigned long alloc_xenheap_pages(int order); void free_xenheap_pages(unsigned long p, int order); #define alloc_xenheap_page() (alloc_xenheap_pages(0)) #define free_xenheap_page(_p) (free_xenheap_pages(_p,0)) -#include +/* Domain suballocator */ +void init_domheap_pages(unsigned long ps, unsigned long pe); +struct pfn_info *alloc_domheap_pages(int order); +void free_domheap_pages(struct pfn_info *pg, int order); +unsigned long avail_domheap_pages(void); +#define alloc_domheap_page() (alloc_domheap_pages(0)) +#define free_domheap_page(_p) (free_domheap_pages(_p,0)) #endif /* __XEN_MM_H__ */ -- 2.30.2